CONTEXT: A communications equipment manufacturing company has a product which is responsible for emitting informative signals. Company wants to build a machine learning model which can help the company to predict the equipment’s signal quality using various parameters.
DATA DESCRIPTION: The data set contains information on various signal tests performed:
PROJECT OBJECTIVE: To build a classifier which can use the given parameters to determine the signal strength or quality.
# Import Libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
# Ignore Warnings
#import warnings
#warnings.filterwarnings("ignore")
# from google.colab import drive
# drive.mount('/content/drive',force_remount=True)
# Read the csv file and store in a dataframe
# data = pd.read_csv('/content/drive/My Drive/PGP: AIML University Of Texas/Assignment - Introduction to Neural Networks/NN Project Data - Signal.csv')
data = pd.read_csv('NN Project Data - Signal.csv')
# Show data header
data.head()
| Parameter 1 | Parameter 2 | Parameter 3 | Parameter 4 | Parameter 5 | Parameter 6 | Parameter 7 | Parameter 8 | Parameter 9 | Parameter 10 | Parameter 11 | Signal_Strength | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 7.4 | 0.70 | 0.00 | 1.9 | 0.076 | 11.0 | 34.0 | 0.9978 | 3.51 | 0.56 | 9.4 | 5 |
| 1 | 7.8 | 0.88 | 0.00 | 2.6 | 0.098 | 25.0 | 67.0 | 0.9968 | 3.20 | 0.68 | 9.8 | 5 |
| 2 | 7.8 | 0.76 | 0.04 | 2.3 | 0.092 | 15.0 | 54.0 | 0.9970 | 3.26 | 0.65 | 9.8 | 5 |
| 3 | 11.2 | 0.28 | 0.56 | 1.9 | 0.075 | 17.0 | 60.0 | 0.9980 | 3.16 | 0.58 | 9.8 | 6 |
| 4 | 7.4 | 0.70 | 0.00 | 1.9 | 0.076 | 11.0 | 34.0 | 0.9978 | 3.51 | 0.56 | 9.4 | 5 |
data.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 1599 entries, 0 to 1598 Data columns (total 12 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Parameter 1 1599 non-null float64 1 Parameter 2 1599 non-null float64 2 Parameter 3 1599 non-null float64 3 Parameter 4 1599 non-null float64 4 Parameter 5 1599 non-null float64 5 Parameter 6 1599 non-null float64 6 Parameter 7 1599 non-null float64 7 Parameter 8 1599 non-null float64 8 Parameter 9 1599 non-null float64 9 Parameter 10 1599 non-null float64 10 Parameter 11 1599 non-null float64 11 Signal_Strength 1599 non-null int64 dtypes: float64(11), int64(1) memory usage: 150.0 KB
# Check for missing values
data.isnull().sum()
Parameter 1 0 Parameter 2 0 Parameter 3 0 Parameter 4 0 Parameter 5 0 Parameter 6 0 Parameter 7 0 Parameter 8 0 Parameter 9 0 Parameter 10 0 Parameter 11 0 Signal_Strength 0 dtype: int64
# Check for missing values percentage
data.isnull().sum()*100/len(data)
Parameter 1 0.0 Parameter 2 0.0 Parameter 3 0.0 Parameter 4 0.0 Parameter 5 0.0 Parameter 6 0.0 Parameter 7 0.0 Parameter 8 0.0 Parameter 9 0.0 Parameter 10 0.0 Parameter 11 0.0 Signal_Strength 0.0 dtype: float64
# Checking duplicate rows in the dataframe
data[data.duplicated()]
| Parameter 1 | Parameter 2 | Parameter 3 | Parameter 4 | Parameter 5 | Parameter 6 | Parameter 7 | Parameter 8 | Parameter 9 | Parameter 10 | Parameter 11 | Signal_Strength | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 4 | 7.4 | 0.700 | 0.00 | 1.90 | 0.076 | 11.0 | 34.0 | 0.99780 | 3.51 | 0.56 | 9.4 | 5 |
| 11 | 7.5 | 0.500 | 0.36 | 6.10 | 0.071 | 17.0 | 102.0 | 0.99780 | 3.35 | 0.80 | 10.5 | 5 |
| 27 | 7.9 | 0.430 | 0.21 | 1.60 | 0.106 | 10.0 | 37.0 | 0.99660 | 3.17 | 0.91 | 9.5 | 5 |
| 40 | 7.3 | 0.450 | 0.36 | 5.90 | 0.074 | 12.0 | 87.0 | 0.99780 | 3.33 | 0.83 | 10.5 | 5 |
| 65 | 7.2 | 0.725 | 0.05 | 4.65 | 0.086 | 4.0 | 11.0 | 0.99620 | 3.41 | 0.39 | 10.9 | 5 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 1563 | 7.2 | 0.695 | 0.13 | 2.00 | 0.076 | 12.0 | 20.0 | 0.99546 | 3.29 | 0.54 | 10.1 | 5 |
| 1564 | 7.2 | 0.695 | 0.13 | 2.00 | 0.076 | 12.0 | 20.0 | 0.99546 | 3.29 | 0.54 | 10.1 | 5 |
| 1567 | 7.2 | 0.695 | 0.13 | 2.00 | 0.076 | 12.0 | 20.0 | 0.99546 | 3.29 | 0.54 | 10.1 | 5 |
| 1581 | 6.2 | 0.560 | 0.09 | 1.70 | 0.053 | 24.0 | 32.0 | 0.99402 | 3.54 | 0.60 | 11.3 | 5 |
| 1596 | 6.3 | 0.510 | 0.13 | 2.30 | 0.076 | 29.0 | 40.0 | 0.99574 | 3.42 | 0.75 | 11.0 | 6 |
240 rows × 12 columns
# dropping and checking again duplicate values
data.drop_duplicates(keep=False,inplace=True)
data[data.duplicated()]
| Parameter 1 | Parameter 2 | Parameter 3 | Parameter 4 | Parameter 5 | Parameter 6 | Parameter 7 | Parameter 8 | Parameter 9 | Parameter 10 | Parameter 11 | Signal_Strength |
|---|
sns.displot(data['Signal_Strength']);
sns.countplot(data['Signal_Strength']);
C:\Users\harit\anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. warnings.warn(
sns.boxplot(data['Signal_Strength']);
C:\Users\harit\anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation. warnings.warn(
# Install and Import Required Library
!pip install ydata-profiling
from ydata_profiling import ProfileReport
# Generate Data Profile Report
profile = ProfileReport(data, title="Equipment Signal Quality Analysis")
profile
Requirement already satisfied: ydata-profiling in c:\users\harit\anaconda3\lib\site-packages (4.12.2) Requirement already satisfied: dacite>=1.8 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (1.9.2) Requirement already satisfied: pydantic>=2 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (2.10.6) Requirement already satisfied: numpy<2.2,>=1.16.0 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (1.22.4) Requirement already satisfied: phik<0.13,>=0.11.1 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (0.12.4) Requirement already satisfied: htmlmin==0.1.12 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (0.1.12) Requirement already satisfied: visions[type_image_path]<0.8.0,>=0.7.5 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (0.7.5) Requirement already satisfied: pandas!=1.4.0,<3,>1.1 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (1.2.4) Requirement already satisfied: statsmodels<1,>=0.13.2 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (0.14.1) Requirement already satisfied: jinja2<3.2,>=2.11.1 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (2.11.3) Requirement already satisfied: requests<3,>=2.24.0 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (2.25.1) Requirement already satisfied: multimethod<2,>=1.4 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (1.10) Requirement already satisfied: imagehash==4.3.1 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (4.3.1) Requirement already satisfied: wordcloud>=1.9.3 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (1.9.4) Requirement already satisfied: seaborn<0.14,>=0.10.1 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (0.11.1) Requirement already satisfied: matplotlib>=3.5 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (3.7.3) Requirement already satisfied: tqdm<5,>=4.48.2 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (4.59.0) Requirement already satisfied: typeguard<5,>=3 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (4.4.0) Requirement already satisfied: scipy<1.16,>=1.4.1 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (1.10.1) Requirement already satisfied: PyYAML<6.1,>=5.0.0 in c:\users\harit\anaconda3\lib\site-packages (from ydata-profiling) (5.4.1) Requirement already satisfied: PyWavelets in c:\users\harit\anaconda3\lib\site-packages (from imagehash==4.3.1->ydata-profiling) (1.1.1) Requirement already satisfied: pillow in c:\users\harit\anaconda3\lib\site-packages (from imagehash==4.3.1->ydata-profiling) (8.2.0) Requirement already satisfied: MarkupSafe>=0.23 in c:\users\harit\anaconda3\lib\site-packages (from jinja2<3.2,>=2.11.1->ydata-profiling) (1.1.1) Requirement already satisfied: cycler>=0.10 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (0.10.0) Requirement already satisfied: importlib-resources>=3.2.0 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (6.4.5) Requirement already satisfied: kiwisolver>=1.0.1 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (1.3.1) Requirement already satisfied: python-dateutil>=2.7 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (2.8.1) Requirement already satisfied: contourpy>=1.0.1 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (1.1.1) Requirement already satisfied: fonttools>=4.22.0 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (4.55.3) Requirement already satisfied: packaging>=20.0 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (24.2) Requirement already satisfied: pyparsing>=2.3.1 in c:\users\harit\anaconda3\lib\site-packages (from matplotlib>=3.5->ydata-profiling) (2.4.7) Requirement already satisfied: six in c:\users\harit\anaconda3\lib\site-packages (from cycler>=0.10->matplotlib>=3.5->ydata-profiling) (1.15.0) Requirement already satisfied: zipp>=3.1.0 in c:\users\harit\anaconda3\lib\site-packages (from importlib-resources>=3.2.0->matplotlib>=3.5->ydata-profiling) (3.20.2) Requirement already satisfied: pytz>=2017.3 in c:\users\harit\anaconda3\lib\site-packages (from pandas!=1.4.0,<3,>1.1->ydata-profiling) (2021.1) Requirement already satisfied: joblib>=0.14.1 in c:\users\harit\anaconda3\lib\site-packages (from phik<0.13,>=0.11.1->ydata-profiling) (1.4.2) Requirement already satisfied: typing-extensions>=4.12.2 in c:\users\harit\anaconda3\lib\site-packages (from pydantic>=2->ydata-profiling) (4.12.2) Requirement already satisfied: pydantic-core==2.27.2 in c:\users\harit\anaconda3\lib\site-packages (from pydantic>=2->ydata-profiling) (2.27.2) Requirement already satisfied: annotated-types>=0.6.0 in c:\users\harit\anaconda3\lib\site-packages (from pydantic>=2->ydata-profiling) (0.7.0) Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\users\harit\anaconda3\lib\site-packages (from requests<3,>=2.24.0->ydata-profiling) (1.26.4) Requirement already satisfied: idna<3,>=2.5 in c:\users\harit\anaconda3\lib\site-packages (from requests<3,>=2.24.0->ydata-profiling) (2.10) Requirement already satisfied: chardet<5,>=3.0.2 in c:\users\harit\anaconda3\lib\site-packages (from requests<3,>=2.24.0->ydata-profiling) (4.0.0) Requirement already satisfied: certifi>=2017.4.17 in c:\users\harit\anaconda3\lib\site-packages (from requests<3,>=2.24.0->ydata-profiling) (2020.12.5) Requirement already satisfied: patsy>=0.5.4 in c:\users\harit\anaconda3\lib\site-packages (from statsmodels<1,>=0.13.2->ydata-profiling) (1.0.1) Requirement already satisfied: importlib-metadata>=3.6 in c:\users\harit\anaconda3\lib\site-packages (from typeguard<5,>=3->ydata-profiling) (8.5.0) Requirement already satisfied: tangled-up-in-unicode>=0.0.4 in c:\users\harit\anaconda3\lib\site-packages (from visions[type_image_path]<0.8.0,>=0.7.5->ydata-profiling) (0.2.0) Requirement already satisfied: attrs>=19.3.0 in c:\users\harit\anaconda3\lib\site-packages (from visions[type_image_path]<0.8.0,>=0.7.5->ydata-profiling) (20.3.0) Requirement already satisfied: networkx>=2.4 in c:\users\harit\anaconda3\lib\site-packages (from visions[type_image_path]<0.8.0,>=0.7.5->ydata-profiling) (2.5) Requirement already satisfied: decorator>=4.3.0 in c:\users\harit\anaconda3\lib\site-packages (from networkx>=2.4->visions[type_image_path]<0.8.0,>=0.7.5->ydata-profiling) (5.0.6)
# Let independent variables be all the parameters
x_DataSet = data.drop(columns='Signal_Strength', axis=1)
x_DataSet.head()
| Parameter 1 | Parameter 2 | Parameter 3 | Parameter 4 | Parameter 5 | Parameter 6 | Parameter 7 | Parameter 8 | Parameter 9 | Parameter 10 | Parameter 11 | |
|---|---|---|---|---|---|---|---|---|---|---|---|
| 1 | 7.8 | 0.88 | 0.00 | 2.6 | 0.098 | 25.0 | 67.0 | 0.9968 | 3.20 | 0.68 | 9.8 |
| 2 | 7.8 | 0.76 | 0.04 | 2.3 | 0.092 | 15.0 | 54.0 | 0.9970 | 3.26 | 0.65 | 9.8 |
| 3 | 11.2 | 0.28 | 0.56 | 1.9 | 0.075 | 17.0 | 60.0 | 0.9980 | 3.16 | 0.58 | 9.8 |
| 5 | 7.4 | 0.66 | 0.00 | 1.8 | 0.075 | 13.0 | 40.0 | 0.9978 | 3.51 | 0.56 | 9.4 |
| 6 | 7.9 | 0.60 | 0.06 | 1.6 | 0.069 | 15.0 | 59.0 | 0.9964 | 3.30 | 0.46 | 9.4 |
y_DataSet = data['Signal_Strength']
y_DataSet
1 5
2 5
3 6
5 5
6 5
..
1593 6
1594 5
1595 6
1597 5
1598 6
Name: Signal_Strength, Length: 1139, dtype: int64
X_train, X_test, y_train, y_test = train_test_split(x_DataSet, y_DataSet, test_size=0.3, random_state=1)
X_train.head()
| Parameter 1 | Parameter 2 | Parameter 3 | Parameter 4 | Parameter 5 | Parameter 6 | Parameter 7 | Parameter 8 | Parameter 9 | Parameter 10 | Parameter 11 | |
|---|---|---|---|---|---|---|---|---|---|---|---|
| 525 | 10.4 | 0.64 | 0.24 | 2.8 | 0.105 | 29.0 | 53.0 | 0.99980 | 3.24 | 0.67 | 9.9 |
| 1004 | 8.2 | 0.43 | 0.29 | 1.6 | 0.081 | 27.0 | 45.0 | 0.99603 | 3.25 | 0.54 | 10.3 |
| 58 | 7.8 | 0.59 | 0.18 | 2.3 | 0.076 | 17.0 | 54.0 | 0.99750 | 3.43 | 0.59 | 10.0 |
| 1317 | 9.9 | 0.44 | 0.46 | 2.2 | 0.091 | 10.0 | 41.0 | 0.99638 | 3.18 | 0.69 | 11.9 |
| 894 | 7.2 | 0.63 | 0.03 | 2.2 | 0.080 | 17.0 | 88.0 | 0.99745 | 3.53 | 0.58 | 9.8 |
print('Shape of Train Data of Independent Variables:',X_train.shape)
print('Shape of Train Data of Target Variables:',y_train.shape)
print('Shape of Test Data of Independent Variables:',X_test.shape)
print('Shape of Test Data of Target Variables:',y_test.shape)
Shape of Train Data of Independent Variables: (797, 11) Shape of Train Data of Target Variables: (797,) Shape of Test Data of Independent Variables: (342, 11) Shape of Test Data of Target Variables: (342,)
# Import necessary module
from sklearn.preprocessing import MinMaxScaler
# Initialize the MinMaxScaler
scaler = MinMaxScaler()
# Apply scaling to the training data
X_train_scaled = scaler.fit_transform(X_train)
# Display the transformed data
X_train_scaled
array([[0.51327434, 0.41025641, 0.30379747, ..., 0.33043478, 0.20606061,
0.23076923],
[0.31858407, 0.23076923, 0.36708861, ..., 0.33913043, 0.12727273,
0.29230769],
[0.28318584, 0.36752137, 0.2278481 , ..., 0.49565217, 0.15757576,
0.24615385],
...,
[0.57522124, 0.23931624, 0.53164557, ..., 0.33913043, 0.14545455,
0.30769231],
[0.3539823 , 0.26495726, 0.37974684, ..., 0.3826087 , 0.12121212,
0.15384615],
[0.23893805, 0.36324786, 0.2278481 , ..., 0.39130435, 0.12727273,
0.21538462]])
# Scale the test dataset using the fitted scaler
X_test_scaled = scaler.transform(X_test)
# Output the scaled test data
X_test_scaled
array([[0.34513274, 0.23931624, 0.63291139, ..., 0.13043478, 0.46666667,
0.15384615],
[0.24778761, 0.16239316, 0.41772152, ..., 0.43478261, 0.16363636,
0.53846154],
[0.48672566, 0.09401709, 0.6835443 , ..., 0.26956522, 0.12121212,
0.63076923],
...,
[0.21238938, 0.23076923, 0.02531646, ..., 0.42608696, 0.29090909,
0.33846154],
[0.20353982, 0.24786325, 0.13924051, ..., 0.3826087 , 0.19393939,
0.46153846],
[0.36283186, 0.51709402, 0.27848101, ..., 0.20869565, 0.13333333,
0.15384615]])
# Retrieve unique values of the target variable 'Signal_Strength'
unique_values = y_DataSet.unique()
# Display unique values
unique_values
array([5, 6, 7, 4, 8, 3], dtype=int64)
# Get the unique values present in the training target variable
unique_train_values = y_train.unique()
# Display the unique values
unique_train_values
array([5, 6, 7, 3, 4, 8], dtype=int64)
# Import necessary module for one-hot encoding
from keras.utils import to_categorical
# Convert integer labels into one-hot encoded format
y_train_one_hot = to_categorical(y_train)
# Display the transformed labels
y_train_one_hot
array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 1., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]], dtype=float32)
# Print an example label from the training set
print("Original Label:", y_train.iloc[1])
# Print the corresponding one-hot encoded label
print("One-Hot Encoded:", y_train_one_hot[0])
Original Label: 5 One-Hot Encoded: [0. 0. 0. 0. 0. 1. 0. 0. 0.]
# Display unique values in the test target variable
print("Unique values in y_test:", y_test.unique())
Unique values in y_test: [5 6 7 8 4 3]
# Convert test target labels into one-hot encoded format
dummy_y_test = to_categorical(y_test)
# Display the transformed labels
dummy_y_test
array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 1., 0., 0.],
[0., 0., 0., ..., 1., 0., 0.],
...,
[0., 0., 0., ..., 1., 0., 0.],
[0., 0., 0., ..., 1., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]], dtype=float32)
print(dummy_y_test[0])
[0. 0. 0. 0. 0. 1. 0. 0. 0.]
X_train_scaled.shape
(797, 11)
# Import necessary modules
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from sklearn.metrics import (
accuracy_score, confusion_matrix, precision_score,
recall_score, f1_score, precision_recall_curve, auc
)
# Initialize the neural network
model = Sequential()
# Input layer with 11 features and 10 neurons
model.add(Dense(units=10, activation='relu', kernel_initializer='he_uniform', input_shape=(11,)))
# Hidden layer with 10 neurons and sigmoid activation
model.add(Dense(units=10, activation='sigmoid', kernel_initializer='glorot_uniform'))
# Output layer with 9 neurons for classification
model.add(Dense(units=9, activation='softmax', kernel_initializer='glorot_uniform'))
# Compile the model using SGD optimizer and categorical crossentropy loss
model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
# Display model architecture
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 10) 120
dense_1 (Dense) (None, 10) 110
dense_2 (Dense) (None, 9) 99
=================================================================
Total params: 329
Trainable params: 329
Non-trainable params: 0
_________________________________________________________________
# Train the model
training_model = model.fit(
X_train_scaled, y_train_one_hot,
validation_data=(X_test_scaled, dummy_y_test),
epochs=100,
batch_size=32,
verbose=1
)
Epoch 1/100 25/25 [==============================] - 1s 10ms/step - loss: 2.5410 - accuracy: 0.0966 - val_loss: 2.3879 - val_accuracy: 0.1228 Epoch 2/100 25/25 [==============================] - 0s 3ms/step - loss: 2.2790 - accuracy: 0.1205 - val_loss: 2.1533 - val_accuracy: 0.1228 Epoch 3/100 25/25 [==============================] - 0s 3ms/step - loss: 2.0664 - accuracy: 0.2923 - val_loss: 1.9654 - val_accuracy: 0.4152 Epoch 4/100 25/25 [==============================] - 0s 4ms/step - loss: 1.8979 - accuracy: 0.4228 - val_loss: 1.8175 - val_accuracy: 0.4181 Epoch 5/100 25/25 [==============================] - 0s 4ms/step - loss: 1.7655 - accuracy: 0.4228 - val_loss: 1.7021 - val_accuracy: 0.4181 Epoch 6/100 25/25 [==============================] - 0s 6ms/step - loss: 1.6629 - accuracy: 0.4228 - val_loss: 1.6125 - val_accuracy: 0.4181 Epoch 7/100 25/25 [==============================] - 0s 7ms/step - loss: 1.5834 - accuracy: 0.4228 - val_loss: 1.5435 - val_accuracy: 0.4181 Epoch 8/100 25/25 [==============================] - 0s 5ms/step - loss: 1.5223 - accuracy: 0.4228 - val_loss: 1.4905 - val_accuracy: 0.4181 Epoch 9/100 25/25 [==============================] - 0s 4ms/step - loss: 1.4750 - accuracy: 0.4228 - val_loss: 1.4494 - val_accuracy: 0.4181 Epoch 10/100 25/25 [==============================] - 0s 6ms/step - loss: 1.4385 - accuracy: 0.4228 - val_loss: 1.4179 - val_accuracy: 0.4181 Epoch 11/100 25/25 [==============================] - 0s 4ms/step - loss: 1.4104 - accuracy: 0.4228 - val_loss: 1.3934 - val_accuracy: 0.4181 Epoch 12/100 25/25 [==============================] - 0s 5ms/step - loss: 1.3881 - accuracy: 0.4228 - val_loss: 1.3739 - val_accuracy: 0.4181 Epoch 13/100 25/25 [==============================] - 0s 4ms/step - loss: 1.3705 - accuracy: 0.4228 - val_loss: 1.3585 - val_accuracy: 0.4181 Epoch 14/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3562 - accuracy: 0.4228 - val_loss: 1.3462 - val_accuracy: 0.4152 Epoch 15/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3445 - accuracy: 0.4228 - val_loss: 1.3359 - val_accuracy: 0.4152 Epoch 16/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3350 - accuracy: 0.4203 - val_loss: 1.3275 - val_accuracy: 0.4152 Epoch 17/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3267 - accuracy: 0.4191 - val_loss: 1.3203 - val_accuracy: 0.4152 Epoch 18/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3195 - accuracy: 0.4203 - val_loss: 1.3141 - val_accuracy: 0.4152 Epoch 19/100 25/25 [==============================] - 0s 4ms/step - loss: 1.3135 - accuracy: 0.4228 - val_loss: 1.3088 - val_accuracy: 0.4181 Epoch 20/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3080 - accuracy: 0.4216 - val_loss: 1.3041 - val_accuracy: 0.4152 Epoch 21/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3033 - accuracy: 0.4228 - val_loss: 1.3000 - val_accuracy: 0.4123 Epoch 22/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2988 - accuracy: 0.4203 - val_loss: 1.2964 - val_accuracy: 0.4123 Epoch 23/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2951 - accuracy: 0.4216 - val_loss: 1.2931 - val_accuracy: 0.4123 Epoch 24/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2915 - accuracy: 0.4216 - val_loss: 1.2901 - val_accuracy: 0.4123 Epoch 25/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2884 - accuracy: 0.4203 - val_loss: 1.2875 - val_accuracy: 0.4123 Epoch 26/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2854 - accuracy: 0.4203 - val_loss: 1.2851 - val_accuracy: 0.4123 Epoch 27/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2826 - accuracy: 0.4216 - val_loss: 1.2828 - val_accuracy: 0.4123 Epoch 28/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2803 - accuracy: 0.4216 - val_loss: 1.2808 - val_accuracy: 0.4123 Epoch 29/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2778 - accuracy: 0.4216 - val_loss: 1.2789 - val_accuracy: 0.4123 Epoch 30/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2756 - accuracy: 0.4216 - val_loss: 1.2772 - val_accuracy: 0.4123 Epoch 31/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2736 - accuracy: 0.4216 - val_loss: 1.2756 - val_accuracy: 0.4123 Epoch 32/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2716 - accuracy: 0.4216 - val_loss: 1.2741 - val_accuracy: 0.4123 Epoch 33/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2698 - accuracy: 0.4228 - val_loss: 1.2727 - val_accuracy: 0.4094 Epoch 34/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2681 - accuracy: 0.4216 - val_loss: 1.2714 - val_accuracy: 0.4123 Epoch 35/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2666 - accuracy: 0.4203 - val_loss: 1.2703 - val_accuracy: 0.4094 Epoch 36/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2651 - accuracy: 0.4191 - val_loss: 1.2691 - val_accuracy: 0.4094 Epoch 37/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2638 - accuracy: 0.4203 - val_loss: 1.2681 - val_accuracy: 0.4094 Epoch 38/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2622 - accuracy: 0.4191 - val_loss: 1.2671 - val_accuracy: 0.4094 Epoch 39/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2609 - accuracy: 0.4216 - val_loss: 1.2662 - val_accuracy: 0.4094 Epoch 40/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2597 - accuracy: 0.4216 - val_loss: 1.2652 - val_accuracy: 0.4152 Epoch 41/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2586 - accuracy: 0.4216 - val_loss: 1.2644 - val_accuracy: 0.4094 Epoch 42/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2574 - accuracy: 0.4228 - val_loss: 1.2637 - val_accuracy: 0.4094 Epoch 43/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2564 - accuracy: 0.4228 - val_loss: 1.2629 - val_accuracy: 0.4094 Epoch 44/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2554 - accuracy: 0.4241 - val_loss: 1.2622 - val_accuracy: 0.4094 Epoch 45/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2542 - accuracy: 0.4241 - val_loss: 1.2615 - val_accuracy: 0.4094 Epoch 46/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2534 - accuracy: 0.4203 - val_loss: 1.2609 - val_accuracy: 0.4094 Epoch 47/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2524 - accuracy: 0.4191 - val_loss: 1.2603 - val_accuracy: 0.4094 Epoch 48/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2518 - accuracy: 0.4228 - val_loss: 1.2597 - val_accuracy: 0.4152 Epoch 49/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2509 - accuracy: 0.4241 - val_loss: 1.2591 - val_accuracy: 0.4123 Epoch 50/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2498 - accuracy: 0.4203 - val_loss: 1.2586 - val_accuracy: 0.4123 Epoch 51/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2492 - accuracy: 0.4279 - val_loss: 1.2581 - val_accuracy: 0.4094 Epoch 52/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2483 - accuracy: 0.4228 - val_loss: 1.2576 - val_accuracy: 0.4152 Epoch 53/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2479 - accuracy: 0.4203 - val_loss: 1.2571 - val_accuracy: 0.4181 Epoch 54/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2469 - accuracy: 0.4228 - val_loss: 1.2566 - val_accuracy: 0.4181 Epoch 55/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2464 - accuracy: 0.4291 - val_loss: 1.2562 - val_accuracy: 0.4181 Epoch 56/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2455 - accuracy: 0.4203 - val_loss: 1.2557 - val_accuracy: 0.4181 Epoch 57/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2450 - accuracy: 0.4266 - val_loss: 1.2553 - val_accuracy: 0.4181 Epoch 58/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2443 - accuracy: 0.4228 - val_loss: 1.2549 - val_accuracy: 0.4181 Epoch 59/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2438 - accuracy: 0.4354 - val_loss: 1.2546 - val_accuracy: 0.4123 Epoch 60/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2430 - accuracy: 0.4228 - val_loss: 1.2542 - val_accuracy: 0.4181 Epoch 61/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2425 - accuracy: 0.4203 - val_loss: 1.2538 - val_accuracy: 0.4181 Epoch 62/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2419 - accuracy: 0.4253 - val_loss: 1.2534 - val_accuracy: 0.4181 Epoch 63/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2413 - accuracy: 0.4253 - val_loss: 1.2530 - val_accuracy: 0.4181 Epoch 64/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2408 - accuracy: 0.4316 - val_loss: 1.2527 - val_accuracy: 0.4181 Epoch 65/100 25/25 [==============================] - 0s 2ms/step - loss: 1.2403 - accuracy: 0.4253 - val_loss: 1.2523 - val_accuracy: 0.4211 Epoch 66/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2396 - accuracy: 0.4329 - val_loss: 1.2520 - val_accuracy: 0.4211 Epoch 67/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2392 - accuracy: 0.4241 - val_loss: 1.2517 - val_accuracy: 0.4181 Epoch 68/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2387 - accuracy: 0.4291 - val_loss: 1.2513 - val_accuracy: 0.4211 Epoch 69/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2381 - accuracy: 0.4529 - val_loss: 1.2511 - val_accuracy: 0.4181 Epoch 70/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2377 - accuracy: 0.4228 - val_loss: 1.2507 - val_accuracy: 0.4152 Epoch 71/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2374 - accuracy: 0.4291 - val_loss: 1.2504 - val_accuracy: 0.4240 Epoch 72/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2366 - accuracy: 0.4329 - val_loss: 1.2501 - val_accuracy: 0.4211 Epoch 73/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2361 - accuracy: 0.4404 - val_loss: 1.2498 - val_accuracy: 0.4181 Epoch 74/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2360 - accuracy: 0.4266 - val_loss: 1.2495 - val_accuracy: 0.4181 Epoch 75/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2355 - accuracy: 0.4266 - val_loss: 1.2492 - val_accuracy: 0.4327 Epoch 76/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2350 - accuracy: 0.4316 - val_loss: 1.2489 - val_accuracy: 0.4327 Epoch 77/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2344 - accuracy: 0.4504 - val_loss: 1.2486 - val_accuracy: 0.4298 Epoch 78/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2341 - accuracy: 0.4379 - val_loss: 1.2483 - val_accuracy: 0.4298 Epoch 79/100 25/25 [==============================] - 0s 4ms/step - loss: 1.2335 - accuracy: 0.4567 - val_loss: 1.2480 - val_accuracy: 0.4181 Epoch 80/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2331 - accuracy: 0.4504 - val_loss: 1.2477 - val_accuracy: 0.4152 Epoch 81/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2328 - accuracy: 0.4442 - val_loss: 1.2475 - val_accuracy: 0.4211 Epoch 82/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2326 - accuracy: 0.4291 - val_loss: 1.2471 - val_accuracy: 0.4298 Epoch 83/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2321 - accuracy: 0.4366 - val_loss: 1.2468 - val_accuracy: 0.4327 Epoch 84/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2316 - accuracy: 0.4329 - val_loss: 1.2464 - val_accuracy: 0.4415 Epoch 85/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2311 - accuracy: 0.4442 - val_loss: 1.2461 - val_accuracy: 0.4503 Epoch 86/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2306 - accuracy: 0.4529 - val_loss: 1.2458 - val_accuracy: 0.4474 Epoch 87/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2302 - accuracy: 0.4479 - val_loss: 1.2455 - val_accuracy: 0.4474 Epoch 88/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2298 - accuracy: 0.4454 - val_loss: 1.2453 - val_accuracy: 0.4474 Epoch 89/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2294 - accuracy: 0.4504 - val_loss: 1.2450 - val_accuracy: 0.4474 Epoch 90/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2290 - accuracy: 0.4504 - val_loss: 1.2446 - val_accuracy: 0.4474 Epoch 91/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2286 - accuracy: 0.4617 - val_loss: 1.2444 - val_accuracy: 0.4444 Epoch 92/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2282 - accuracy: 0.4517 - val_loss: 1.2440 - val_accuracy: 0.4444 Epoch 93/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2279 - accuracy: 0.4467 - val_loss: 1.2437 - val_accuracy: 0.4561 Epoch 94/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2272 - accuracy: 0.4467 - val_loss: 1.2433 - val_accuracy: 0.4649 Epoch 95/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2270 - accuracy: 0.4705 - val_loss: 1.2431 - val_accuracy: 0.4503 Epoch 96/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2266 - accuracy: 0.4555 - val_loss: 1.2427 - val_accuracy: 0.4591 Epoch 97/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2261 - accuracy: 0.4655 - val_loss: 1.2424 - val_accuracy: 0.4591 Epoch 98/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2257 - accuracy: 0.4680 - val_loss: 1.2421 - val_accuracy: 0.4591 Epoch 99/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2254 - accuracy: 0.4768 - val_loss: 1.2418 - val_accuracy: 0.4591 Epoch 100/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2248 - accuracy: 0.4567 - val_loss: 1.2414 - val_accuracy: 0.4649
# Display available metrics from training history
print(training_model.history.keys())
dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
# Visualize training and validation loss over epochs
plt.plot(training_model.history['loss'], label='Training Loss')
plt.plot(training_model.history['val_loss'], label='Validation Loss')
plt.title('Model Loss Progression')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(loc='upper left')
plt.show()
# Visualizing training and validation accuracy over epochs
plt.plot(training_model.history['accuracy'], label='Training Accuracy')
plt.plot(training_model.history['val_accuracy'], label='Validation Accuracy')
plt.title('Model Accuracy Progression')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.show()
# Import necessary modules
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.keras.optimizers import Adam
# Initialize the neural network model
classifier_model = Sequential()
# Input Layer with Batch Normalization and Dropout
classifier_model.add(Dense(units=9, activation='relu', input_dim=11, kernel_initializer='he_normal'))
classifier_model.add(BatchNormalization())
classifier_model.add(Dropout(0.1))
# Hidden Layer with Batch Normalization and Dropout
classifier_model.add(Dense(units=9, activation='relu', kernel_initializer='he_normal'))
classifier_model.add(BatchNormalization())
classifier_model.add(Dropout(0.1))
# Output Layer
classifier_model.add(Dense(units=9, activation='softmax', kernel_initializer='glorot_uniform'))
# Compile the model using Adam optimizer
classifier_model.compile(optimizer=Adam(learning_rate=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
# Display the model summary
classifier_model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_3 (Dense) (None, 9) 108
batch_normalization (BatchN (None, 9) 36
ormalization)
dropout (Dropout) (None, 9) 0
dense_4 (Dense) (None, 9) 90
batch_normalization_1 (Batc (None, 9) 36
hNormalization)
dropout_1 (Dropout) (None, 9) 0
dense_5 (Dense) (None, 9) 90
=================================================================
Total params: 360
Trainable params: 324
Non-trainable params: 36
_________________________________________________________________
# Train the model
training_model = classifier_model.fit(
X_train_scaled, y_train_one_hot,
validation_data=(X_test_scaled, dummy_y_test),
epochs=100, batch_size=32
)
Epoch 1/100 25/25 [==============================] - 1s 10ms/step - loss: 2.4407 - accuracy: 0.1481 - val_loss: 2.0491 - val_accuracy: 0.5029 Epoch 2/100 25/25 [==============================] - 0s 4ms/step - loss: 2.2500 - accuracy: 0.2171 - val_loss: 1.9288 - val_accuracy: 0.5439 Epoch 3/100 25/25 [==============================] - 0s 4ms/step - loss: 2.0950 - accuracy: 0.2836 - val_loss: 1.8277 - val_accuracy: 0.5556 Epoch 4/100 25/25 [==============================] - 0s 4ms/step - loss: 1.9682 - accuracy: 0.3476 - val_loss: 1.7338 - val_accuracy: 0.5585 Epoch 5/100 25/25 [==============================] - 0s 4ms/step - loss: 1.8980 - accuracy: 0.4053 - val_loss: 1.6529 - val_accuracy: 0.5673 Epoch 6/100 25/25 [==============================] - 0s 5ms/step - loss: 1.8187 - accuracy: 0.4203 - val_loss: 1.5772 - val_accuracy: 0.5556 Epoch 7/100 25/25 [==============================] - 0s 4ms/step - loss: 1.7329 - accuracy: 0.4642 - val_loss: 1.5099 - val_accuracy: 0.5556 Epoch 8/100 25/25 [==============================] - 0s 3ms/step - loss: 1.6522 - accuracy: 0.4843 - val_loss: 1.4482 - val_accuracy: 0.5643 Epoch 9/100 25/25 [==============================] - 0s 3ms/step - loss: 1.5811 - accuracy: 0.4843 - val_loss: 1.3957 - val_accuracy: 0.5731 Epoch 10/100 25/25 [==============================] - 0s 3ms/step - loss: 1.5336 - accuracy: 0.4969 - val_loss: 1.3494 - val_accuracy: 0.5789 Epoch 11/100 25/25 [==============================] - 0s 3ms/step - loss: 1.4710 - accuracy: 0.4918 - val_loss: 1.3122 - val_accuracy: 0.5789 Epoch 12/100 25/25 [==============================] - 0s 4ms/step - loss: 1.4177 - accuracy: 0.5420 - val_loss: 1.2781 - val_accuracy: 0.5819 Epoch 13/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3705 - accuracy: 0.5307 - val_loss: 1.2459 - val_accuracy: 0.5877 Epoch 14/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3475 - accuracy: 0.5270 - val_loss: 1.2222 - val_accuracy: 0.5936 Epoch 15/100 25/25 [==============================] - 0s 3ms/step - loss: 1.3104 - accuracy: 0.5496 - val_loss: 1.2052 - val_accuracy: 0.5965 Epoch 16/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2937 - accuracy: 0.5044 - val_loss: 1.1868 - val_accuracy: 0.5994 Epoch 17/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2470 - accuracy: 0.5307 - val_loss: 1.1676 - val_accuracy: 0.5936 Epoch 18/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2435 - accuracy: 0.5546 - val_loss: 1.1495 - val_accuracy: 0.6023 Epoch 19/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2246 - accuracy: 0.5358 - val_loss: 1.1361 - val_accuracy: 0.5994 Epoch 20/100 25/25 [==============================] - 0s 3ms/step - loss: 1.2041 - accuracy: 0.5483 - val_loss: 1.1245 - val_accuracy: 0.6053 Epoch 21/100 25/25 [==============================] - 0s 3ms/step - loss: 1.1873 - accuracy: 0.5345 - val_loss: 1.1153 - val_accuracy: 0.6023 Epoch 22/100 25/25 [==============================] - 0s 3ms/step - loss: 1.1755 - accuracy: 0.5445 - val_loss: 1.1105 - val_accuracy: 0.5994 Epoch 23/100 25/25 [==============================] - 0s 3ms/step - loss: 1.1655 - accuracy: 0.5307 - val_loss: 1.1055 - val_accuracy: 0.6023 Epoch 24/100 25/25 [==============================] - 0s 3ms/step - loss: 1.1210 - accuracy: 0.5458 - val_loss: 1.0959 - val_accuracy: 0.6053 Epoch 25/100 25/25 [==============================] - 0s 3ms/step - loss: 1.1158 - accuracy: 0.5671 - val_loss: 1.0831 - val_accuracy: 0.6023 Epoch 26/100 25/25 [==============================] - 0s 3ms/step - loss: 1.1201 - accuracy: 0.5583 - val_loss: 1.0756 - val_accuracy: 0.6023 Epoch 27/100 25/25 [==============================] - 0s 4ms/step - loss: 1.1004 - accuracy: 0.5596 - val_loss: 1.0721 - val_accuracy: 0.6023 Epoch 28/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0982 - accuracy: 0.5521 - val_loss: 1.0707 - val_accuracy: 0.5965 Epoch 29/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0915 - accuracy: 0.5445 - val_loss: 1.0683 - val_accuracy: 0.6023 Epoch 30/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0906 - accuracy: 0.5558 - val_loss: 1.0662 - val_accuracy: 0.6023 Epoch 31/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0781 - accuracy: 0.5521 - val_loss: 1.0607 - val_accuracy: 0.6023 Epoch 32/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0806 - accuracy: 0.5483 - val_loss: 1.0597 - val_accuracy: 0.6053 Epoch 33/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0369 - accuracy: 0.5621 - val_loss: 1.0528 - val_accuracy: 0.6053 Epoch 34/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0727 - accuracy: 0.5445 - val_loss: 1.0506 - val_accuracy: 0.6082 Epoch 35/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0516 - accuracy: 0.5596 - val_loss: 1.0471 - val_accuracy: 0.6111 Epoch 36/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0582 - accuracy: 0.5621 - val_loss: 1.0461 - val_accuracy: 0.6053 Epoch 37/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0654 - accuracy: 0.5521 - val_loss: 1.0427 - val_accuracy: 0.5965 Epoch 38/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0411 - accuracy: 0.5659 - val_loss: 1.0398 - val_accuracy: 0.5906 Epoch 39/100 25/25 [==============================] - 0s 5ms/step - loss: 1.0617 - accuracy: 0.5546 - val_loss: 1.0370 - val_accuracy: 0.5994 Epoch 40/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0316 - accuracy: 0.5721 - val_loss: 1.0355 - val_accuracy: 0.6053 Epoch 41/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0250 - accuracy: 0.5684 - val_loss: 1.0326 - val_accuracy: 0.6111 Epoch 42/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0462 - accuracy: 0.5408 - val_loss: 1.0304 - val_accuracy: 0.6170 Epoch 43/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0330 - accuracy: 0.5596 - val_loss: 1.0295 - val_accuracy: 0.6053 Epoch 44/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0391 - accuracy: 0.5508 - val_loss: 1.0282 - val_accuracy: 0.6023 Epoch 45/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0268 - accuracy: 0.5759 - val_loss: 1.0304 - val_accuracy: 0.5936 Epoch 46/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0189 - accuracy: 0.5483 - val_loss: 1.0301 - val_accuracy: 0.6111 Epoch 47/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0481 - accuracy: 0.5521 - val_loss: 1.0287 - val_accuracy: 0.6140 Epoch 48/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0340 - accuracy: 0.5646 - val_loss: 1.0237 - val_accuracy: 0.6111 Epoch 49/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9997 - accuracy: 0.5834 - val_loss: 1.0232 - val_accuracy: 0.6082 Epoch 50/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0211 - accuracy: 0.5684 - val_loss: 1.0243 - val_accuracy: 0.6199 Epoch 51/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0163 - accuracy: 0.5571 - val_loss: 1.0246 - val_accuracy: 0.6199 Epoch 52/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0156 - accuracy: 0.5533 - val_loss: 1.0215 - val_accuracy: 0.6170 Epoch 53/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0413 - accuracy: 0.5533 - val_loss: 1.0208 - val_accuracy: 0.6170 Epoch 54/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9914 - accuracy: 0.5847 - val_loss: 1.0195 - val_accuracy: 0.6140 Epoch 55/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0184 - accuracy: 0.5546 - val_loss: 1.0156 - val_accuracy: 0.6053 Epoch 56/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0251 - accuracy: 0.5508 - val_loss: 1.0143 - val_accuracy: 0.6140 Epoch 57/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0152 - accuracy: 0.5508 - val_loss: 1.0171 - val_accuracy: 0.6053 Epoch 58/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0132 - accuracy: 0.5420 - val_loss: 1.0178 - val_accuracy: 0.6053 Epoch 59/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0298 - accuracy: 0.5521 - val_loss: 1.0188 - val_accuracy: 0.6140 Epoch 60/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0062 - accuracy: 0.5671 - val_loss: 1.0217 - val_accuracy: 0.5994 Epoch 61/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9882 - accuracy: 0.5684 - val_loss: 1.0203 - val_accuracy: 0.6287 Epoch 62/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0067 - accuracy: 0.5684 - val_loss: 1.0214 - val_accuracy: 0.6111 Epoch 63/100 25/25 [==============================] - 0s 5ms/step - loss: 1.0091 - accuracy: 0.5596 - val_loss: 1.0191 - val_accuracy: 0.6140 Epoch 64/100 25/25 [==============================] - 0s 5ms/step - loss: 1.0060 - accuracy: 0.5646 - val_loss: 1.0161 - val_accuracy: 0.6082 Epoch 65/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0050 - accuracy: 0.5634 - val_loss: 1.0142 - val_accuracy: 0.6199 Epoch 66/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9908 - accuracy: 0.5634 - val_loss: 1.0158 - val_accuracy: 0.6111 Epoch 67/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0268 - accuracy: 0.5408 - val_loss: 1.0145 - val_accuracy: 0.6199 Epoch 68/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0043 - accuracy: 0.5646 - val_loss: 1.0162 - val_accuracy: 0.6082 Epoch 69/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0020 - accuracy: 0.5508 - val_loss: 1.0154 - val_accuracy: 0.6170 Epoch 70/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0146 - accuracy: 0.5508 - val_loss: 1.0151 - val_accuracy: 0.6140 Epoch 71/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9736 - accuracy: 0.5709 - val_loss: 1.0136 - val_accuracy: 0.6140 Epoch 72/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9846 - accuracy: 0.5634 - val_loss: 1.0146 - val_accuracy: 0.6170 Epoch 73/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0316 - accuracy: 0.5483 - val_loss: 1.0136 - val_accuracy: 0.6228 Epoch 74/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9873 - accuracy: 0.5621 - val_loss: 1.0154 - val_accuracy: 0.6170 Epoch 75/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0020 - accuracy: 0.5671 - val_loss: 1.0119 - val_accuracy: 0.6170 Epoch 76/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9954 - accuracy: 0.5634 - val_loss: 1.0105 - val_accuracy: 0.6199 Epoch 77/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9963 - accuracy: 0.5496 - val_loss: 1.0088 - val_accuracy: 0.6257 Epoch 78/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9816 - accuracy: 0.5496 - val_loss: 1.0065 - val_accuracy: 0.6199 Epoch 79/100 25/25 [==============================] - 0s 4ms/step - loss: 1.0083 - accuracy: 0.5445 - val_loss: 1.0067 - val_accuracy: 0.6228 Epoch 80/100 25/25 [==============================] - 0s 5ms/step - loss: 1.0042 - accuracy: 0.5471 - val_loss: 1.0070 - val_accuracy: 0.6140 Epoch 81/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9862 - accuracy: 0.5546 - val_loss: 1.0086 - val_accuracy: 0.6111 Epoch 82/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0018 - accuracy: 0.5646 - val_loss: 1.0118 - val_accuracy: 0.6140 Epoch 83/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9982 - accuracy: 0.5508 - val_loss: 1.0090 - val_accuracy: 0.6170 Epoch 84/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9983 - accuracy: 0.5583 - val_loss: 1.0119 - val_accuracy: 0.6140 Epoch 85/100 25/25 [==============================] - 0s 3ms/step - loss: 1.0025 - accuracy: 0.5621 - val_loss: 1.0093 - val_accuracy: 0.6170 Epoch 86/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9894 - accuracy: 0.5583 - val_loss: 1.0104 - val_accuracy: 0.6199 Epoch 87/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9892 - accuracy: 0.5772 - val_loss: 1.0127 - val_accuracy: 0.6199 Epoch 88/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9819 - accuracy: 0.5772 - val_loss: 1.0146 - val_accuracy: 0.6199 Epoch 89/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9898 - accuracy: 0.5471 - val_loss: 1.0108 - val_accuracy: 0.6287 Epoch 90/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9745 - accuracy: 0.5583 - val_loss: 1.0139 - val_accuracy: 0.6228 Epoch 91/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9992 - accuracy: 0.5734 - val_loss: 1.0152 - val_accuracy: 0.6170 Epoch 92/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9630 - accuracy: 0.5809 - val_loss: 1.0118 - val_accuracy: 0.6140 Epoch 93/100 25/25 [==============================] - 0s 3ms/step - loss: 0.9760 - accuracy: 0.5496 - val_loss: 1.0158 - val_accuracy: 0.6111 Epoch 94/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9782 - accuracy: 0.5847 - val_loss: 1.0167 - val_accuracy: 0.6228 Epoch 95/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9890 - accuracy: 0.5684 - val_loss: 1.0148 - val_accuracy: 0.6228 Epoch 96/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9780 - accuracy: 0.5546 - val_loss: 1.0173 - val_accuracy: 0.6140 Epoch 97/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9822 - accuracy: 0.5646 - val_loss: 1.0248 - val_accuracy: 0.6111 Epoch 98/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9809 - accuracy: 0.5822 - val_loss: 1.0237 - val_accuracy: 0.6111 Epoch 99/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9696 - accuracy: 0.5659 - val_loss: 1.0232 - val_accuracy: 0.6111 Epoch 100/100 25/25 [==============================] - 0s 4ms/step - loss: 0.9804 - accuracy: 0.5696 - val_loss: 1.0240 - val_accuracy: 0.6140
# Plot training vs validation loss
plt.plot(training_model.history['loss'], label='Training Loss')
plt.plot(training_model.history['val_loss'], label='Validation Loss')
plt.title('Model Loss Over Epochs')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(loc='upper left')
plt.show()
# Plot training vs validation accuracy
plt.plot(training_model.history['accuracy'], label='Training Accuracy')
plt.plot(training_model.history['val_accuracy'], label='Validation Accuracy')
plt.title('Model Accuracy Over Epochs')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='upper left')
plt.show()
CONTEXT: A Recognising multi-digit numbers in photographs captured at street level is an important component of modern-day map making. A classic example of a corpus of such street-level photographs is Google’s Street View imagery composed of hundreds of millions of geo-located 360-degree panoramic images. The ability to automatically transcribe an address number from a geo-located patch of pixels and associate the transcribed number with a known street address helps pinpoint, with a high degree of accuracy, the location of the building it represents. More broadly, recognising numbers in photographs is a problem of interest to the optical character recognition community. While OCR on constrained domains like document processing is well studied, arbitrary multi-character text recognition in photographs is still highly challenging. This difficulty arises due to the wide variability in the visual appearance of text in the wild on account of a large range of fonts, colours, styles, orientations, and character arrangements. The recognition problem is further complicated by environmental factors such as lighting, shadows, specularity, and occlusions as well as by image acquisition factors such as resolution, motion, and focus blurs. In this project, we will use the dataset with images centred around a single digit (many of the images do contain some distractors at the sides). Although we are taking a sample of the data which is simpler, it is more complex than MNIST because of the distractors.
DATA DESCRIPTION: The SVHN is a real-world image dataset for developing machine learning and object recognition algorithms with the minimal requirement on data formatting but comes from a significantly harder, unsolved, real-world problem (recognising digits and numbers in natural scene images). SVHN is obtained from house numbers in Google Street View images.
PROJECT OBJECTIVE: To build a digit classifier on the SVHN (Street View Housing Number) dataset.
# Required Library
import h5py
# Load the SVHN dataset from the HDF5 file
# svhn_data = h5py.File('/content/drive/My Drive/PGP: AIML University Of Texas/Assignment - Introduction to Neural Networks/Autonomous_Vehicles_SVHN_single_grey1.h5', 'r')
svhn_data = h5py.File('Autonomous_Vehicles_SVHN_single_grey1.h5', 'r')
# Display dataset structure
svhn_data
<HDF5 file "Autonomous_Vehicles_SVHN_single_grey1.h5" (mode r)>
# Retrieve keys from the HDF5 file
list(svhn_data.keys())
['X_test', 'X_train', 'X_val', 'y_test', 'y_train', 'y_val']
# Extract training and testing data from the HDF5 file
train_images = svhn_data['X_train'][:]
train_labels = svhn_data['y_train'][:]
test_images = svhn_data['X_test'][:]
test_labels = svhn_data['y_test'][:]
# Display training labels
train_labels
array([2, 6, 7, ..., 7, 0, 4], dtype=uint8)
# Print shape of the training and test sets
print(train_images.shape, train_labels.shape)
print(test_images.shape, test_labels.shape)
(42000, 32, 32) (42000,) (18000, 32, 32) (18000,)
import matplotlib.pyplot as plt
# Display first 10 images with labels
for i in range(10):
image = train_images[i]
plt.figure(i)
plt.imshow(image, cmap='gray') # Ensure grayscale display
plt.xlabel(str(train_labels[i])) # Convert label to string if necessary
plt.show()
# Visualizing the first 10 images in the dataset and their labels
plt.figure(figsize=(10, 1))
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.imshow(train_images[i], cmap='gray') # Ensure grayscale display
plt.axis('off')
plt.show()
print('Labels for the above images:', train_labels[:10])
Labels for the above images: [2 6 7 4 4 0 3 0 7 3]
# Shape of the images and the first image
print("Shape:", train_images[0].shape)
print()
print("First image:\n", train_images[0])
Shape: (32, 32) First image: [[ 33.0704 30.2601 26.852 ... 71.4471 58.2204 42.9939] [ 25.2283 25.5533 29.9765 ... 113.0209 103.3639 84.2949] [ 26.2775 22.6137 40.4763 ... 113.3028 121.775 115.4228] ... [ 28.5502 36.212 45.0801 ... 24.1359 25.0927 26.0603] [ 38.4352 26.4733 23.2717 ... 28.1094 29.4683 30.0661] [ 50.2984 26.0773 24.0389 ... 49.6682 50.853 53.0377]]
# Reshaping the dataset to flatten them. Convert 2D images into 1D arrays
train_images = train_images.reshape(train_images.shape[0], -1)
test_images = test_images.reshape(test_images.shape[0], -1)
# Check Shape of the images and the first image
print("Shape:", train_images[0].shape)
print()
print("First image:\n", train_images[0])
Shape: (1024,) First image: [33.0704 30.2601 26.852 ... 49.6682 50.853 53.0377]
print("train_images:", train_images.shape, " test_images:", test_images.shape,
" y_train:", train_labels.shape, " y_test:", test_labels.shape)
train_images: (42000, 1024) test_images: (18000, 1024) y_train: (42000,) y_test: (18000,)
# Normalize inputs
train_images = train_images / 255.0
test_images = test_images / 255.0
print("Max value in train_images:", train_images.max())
print("Min value in train_images:", train_images.min())
Max value in train_images: 0.9999 Min value in train_images: 0.0
# One-hot encode output labels
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
print(train_labels.shape)
print(test_labels.shape)
(42000, 10) (18000, 10)
print("Number of classes in train_labels:", train_labels.shape[1])
print("Number of classes in test_labels:", test_labels.shape[1])
Number of classes in train_labels: 10 Number of classes in test_labels: 10
# Import necessary modules
from tensorflow.keras import losses, optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
# Define the function to create the model
def build_neural_network():
model = Sequential([
Dense(256, activation='relu', input_shape=(1024,)),
Dense(128, activation='relu'),
Dropout(0.2),
Dense(64, activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
# Initialize Adam optimizer with a specific learning rate
adam_optimizer = optimizers.Adam(learning_rate=0.0005)
# Compile the model with categorical cross-entropy loss and accuracy metric
model.compile(optimizer=adam_optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# Instantiate and summarize the model
model_v2 = build_neural_network()
model_v2.summary()
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_6 (Dense) (None, 256) 262400
dense_7 (Dense) (None, 128) 32896
dropout_2 (Dropout) (None, 128) 0
dense_8 (Dense) (None, 64) 8256
dense_9 (Dense) (None, 64) 4160
dense_10 (Dense) (None, 32) 2080
batch_normalization_2 (Batc (None, 32) 128
hNormalization)
dense_11 (Dense) (None, 10) 330
=================================================================
Total params: 310,250
Trainable params: 310,186
Non-trainable params: 64
_________________________________________________________________
# Train the model with updated variable names
history_v2 = model_v2.fit(
train_images, train_labels,
epochs=30,
validation_split=0.2,
batch_size=128,
verbose=1
)
Epoch 1/30 263/263 [==============================] - 3s 8ms/step - loss: 2.3473 - accuracy: 0.1033 - val_loss: 2.3027 - val_accuracy: 0.1082 Epoch 2/30 263/263 [==============================] - 2s 7ms/step - loss: 2.2768 - accuracy: 0.1293 - val_loss: 2.2495 - val_accuracy: 0.1867 Epoch 3/30 263/263 [==============================] - 2s 7ms/step - loss: 1.8116 - accuracy: 0.3579 - val_loss: 1.6004 - val_accuracy: 0.4800 Epoch 4/30 263/263 [==============================] - 2s 7ms/step - loss: 1.3838 - accuracy: 0.5406 - val_loss: 1.2527 - val_accuracy: 0.5980 Epoch 5/30 263/263 [==============================] - 2s 7ms/step - loss: 1.2386 - accuracy: 0.5937 - val_loss: 1.1914 - val_accuracy: 0.6094 Epoch 6/30 263/263 [==============================] - 2s 7ms/step - loss: 1.1558 - accuracy: 0.6251 - val_loss: 1.1460 - val_accuracy: 0.6245 Epoch 7/30 263/263 [==============================] - 2s 7ms/step - loss: 1.0942 - accuracy: 0.6472 - val_loss: 1.0439 - val_accuracy: 0.6626 Epoch 8/30 263/263 [==============================] - 2s 7ms/step - loss: 1.0401 - accuracy: 0.6659 - val_loss: 0.9750 - val_accuracy: 0.6932 Epoch 9/30 263/263 [==============================] - 2s 8ms/step - loss: 0.9976 - accuracy: 0.6829 - val_loss: 0.9236 - val_accuracy: 0.7056 Epoch 10/30 263/263 [==============================] - 2s 7ms/step - loss: 0.9745 - accuracy: 0.6875 - val_loss: 0.9240 - val_accuracy: 0.7062 Epoch 11/30 263/263 [==============================] - 2s 6ms/step - loss: 0.9326 - accuracy: 0.7028 - val_loss: 0.9154 - val_accuracy: 0.7079 Epoch 12/30 263/263 [==============================] - 2s 7ms/step - loss: 0.9134 - accuracy: 0.7093 - val_loss: 0.8265 - val_accuracy: 0.7402 Epoch 13/30 263/263 [==============================] - 2s 6ms/step - loss: 0.8854 - accuracy: 0.7160 - val_loss: 0.8710 - val_accuracy: 0.7318 Epoch 14/30 263/263 [==============================] - 2s 7ms/step - loss: 0.8738 - accuracy: 0.7235 - val_loss: 0.8736 - val_accuracy: 0.7273 Epoch 15/30 263/263 [==============================] - 2s 7ms/step - loss: 0.8596 - accuracy: 0.7279 - val_loss: 0.8453 - val_accuracy: 0.7285 Epoch 16/30 263/263 [==============================] - 2s 7ms/step - loss: 0.8380 - accuracy: 0.7329 - val_loss: 0.8115 - val_accuracy: 0.7452 Epoch 17/30 263/263 [==============================] - 2s 7ms/step - loss: 0.8216 - accuracy: 0.7406 - val_loss: 0.7739 - val_accuracy: 0.7626 Epoch 18/30 263/263 [==============================] - 2s 7ms/step - loss: 0.8040 - accuracy: 0.7448 - val_loss: 0.8109 - val_accuracy: 0.7452 Epoch 19/30 263/263 [==============================] - 2s 6ms/step - loss: 0.7931 - accuracy: 0.7483 - val_loss: 0.7575 - val_accuracy: 0.7655 Epoch 20/30 263/263 [==============================] - 2s 6ms/step - loss: 0.7792 - accuracy: 0.7513 - val_loss: 0.8169 - val_accuracy: 0.7487 Epoch 21/30 263/263 [==============================] - 2s 7ms/step - loss: 0.7762 - accuracy: 0.7530 - val_loss: 0.7545 - val_accuracy: 0.7660 Epoch 22/30 263/263 [==============================] - 2s 6ms/step - loss: 0.7630 - accuracy: 0.7574 - val_loss: 0.7802 - val_accuracy: 0.7573 Epoch 23/30 263/263 [==============================] - 2s 6ms/step - loss: 0.7521 - accuracy: 0.7617 - val_loss: 0.7936 - val_accuracy: 0.7469 Epoch 24/30 263/263 [==============================] - 2s 7ms/step - loss: 0.7374 - accuracy: 0.7654 - val_loss: 0.7288 - val_accuracy: 0.7702 Epoch 25/30 263/263 [==============================] - 2s 7ms/step - loss: 0.7301 - accuracy: 0.7672 - val_loss: 0.7799 - val_accuracy: 0.7485 Epoch 26/30 263/263 [==============================] - 2s 6ms/step - loss: 0.7188 - accuracy: 0.7710 - val_loss: 0.7257 - val_accuracy: 0.7758 Epoch 27/30 263/263 [==============================] - 2s 7ms/step - loss: 0.7267 - accuracy: 0.7668 - val_loss: 0.7239 - val_accuracy: 0.7702 Epoch 28/30 263/263 [==============================] - 2s 6ms/step - loss: 0.7250 - accuracy: 0.7676 - val_loss: 0.7690 - val_accuracy: 0.7626 Epoch 29/30 263/263 [==============================] - 2s 7ms/step - loss: 0.7064 - accuracy: 0.7749 - val_loss: 0.7720 - val_accuracy: 0.7545 Epoch 30/30 263/263 [==============================] - 2s 6ms/step - loss: 0.6956 - accuracy: 0.7769 - val_loss: 0.7211 - val_accuracy: 0.7724
# Generate predictions from the trained model
predicted_labels = model_v2.predict(test_images)
# Convert probabilities to class labels
predicted_labels = np.argmax(predicted_labels, axis=-1)
# Convert actual labels from one-hot encoding to categorical values
actual_labels = np.argmax(test_labels, axis=-1)
# Import necessary evaluation metrics
from sklearn.metrics import classification_report, confusion_matrix
# Display classification performance
print("Classification Report:\n", classification_report(actual_labels, predicted_labels))
# Create a confusion matrix
conf_matrix = confusion_matrix(actual_labels, predicted_labels)
# Visualizing the confusion matrix using a heatmap
plt.figure(figsize=(8, 5))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.title('Confusion Matrix')
plt.show()
563/563 [==============================] - 1s 1ms/step
Classification Report:
precision recall f1-score support
0 0.81 0.81 0.81 1814
1 0.71 0.87 0.78 1828
2 0.82 0.79 0.80 1803
3 0.75 0.73 0.74 1719
4 0.81 0.83 0.82 1812
5 0.77 0.69 0.73 1768
6 0.83 0.73 0.78 1832
7 0.78 0.83 0.80 1808
8 0.71 0.74 0.73 1812
9 0.81 0.74 0.77 1804
accuracy 0.78 18000
macro avg 0.78 0.78 0.78 18000
weighted avg 0.78 0.78 0.78 18000
# Extract accuracy data from training history
training_history = history_v2.history
epochs_list = list(range(1, 31))
# Plot training and validation accuracy
plt.figure(figsize=(8, 8))
plt.plot(epochs_list, training_history['accuracy'], linestyle='--', label='Training Accuracy')
plt.plot(epochs_list, training_history['val_accuracy'], linestyle='--', label='Validation Accuracy')
# Add labels, legend, and title
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('Model Accuracy Over Epochs')
plt.legend()
plt.show()
# Extract loss data from training history
training_history = history_v2.history
epochs_list = list(range(1, 31))
# Plot training and validation loss
plt.figure(figsize=(8, 8))
plt.plot(epochs_list, training_history['loss'], linestyle='--', label='Training Loss')
plt.plot(epochs_list, training_history['val_loss'], linestyle='--', label='Validation Loss')
# Add labels, legend, and title
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Model Loss Over Epochs')
plt.legend()
plt.show()